unsigned long max_page;
unsigned long total_pages;
+unsigned long __read_mostly pdx_group_valid[BITS_TO_LONGS(
+ (FRAMETABLE_SIZE / sizeof(*frame_table) + PDX_GROUP_COUNT - 1)
+ / PDX_GROUP_COUNT)] = { [0] = 1 };
+
#define PAGE_CACHE_ATTRS (_PAGE_PAT|_PAGE_PCD|_PAGE_PWT)
int opt_allow_hugepage;
#define l3_disallow_mask(d) L3_DISALLOW_MASK
#endif
-void __init init_frametable(void)
+static void __init init_frametable_chunk(void *start, void *end)
{
- unsigned long nr_pages, page_step, i, mfn;
-
-#ifdef __x86_64__
- BUILD_BUG_ON(FRAMETABLE_VIRT_START & ((1UL << L3_PAGETABLE_SHIFT) - 1));
- BUILD_BUG_ON(XEN_VIRT_END > FRAMETABLE_VIRT_END);
-#else
- BUILD_BUG_ON(FRAMETABLE_VIRT_START & ((1UL << L2_PAGETABLE_SHIFT) - 1));
-#endif
-
- nr_pages = PFN_UP(max_pdx * sizeof(*frame_table));
- page_step = 1 << (cpu_has_page1gb ? L3_PAGETABLE_SHIFT - PAGE_SHIFT
- : L2_PAGETABLE_SHIFT - PAGE_SHIFT);
+ unsigned long s = (unsigned long)start;
+ unsigned long e = (unsigned long)end;
+ unsigned long step, mfn;
- for ( i = 0; i < nr_pages; i += page_step )
+ ASSERT(!(s & ((1 << L2_PAGETABLE_SHIFT) - 1)));
+ for ( ; s < e; s += step << PAGE_SHIFT )
{
+ step = 1UL << (cpu_has_page1gb &&
+ !(s & ((1UL << L3_PAGETABLE_SHIFT) - 1)) ?
+ L3_PAGETABLE_SHIFT - PAGE_SHIFT :
+ L2_PAGETABLE_SHIFT - PAGE_SHIFT);
/*
* The hardcoded 4 below is arbitrary - just pick whatever you think
* is reasonable to waste as a trade-off for using a large page.
*/
- while (nr_pages + 4 - i < page_step)
- page_step >>= PAGETABLE_ORDER;
- mfn = alloc_boot_pages(page_step, page_step);
- map_pages_to_xen(
- FRAMETABLE_VIRT_START + (i << PAGE_SHIFT),
- mfn, page_step, PAGE_HYPERVISOR);
+ while ( step && s + (step << PAGE_SHIFT) > e + (4 << PAGE_SHIFT) )
+ step >>= PAGETABLE_ORDER;
+ do {
+ mfn = alloc_boot_pages(step, step);
+ } while ( !mfn && (step >>= PAGETABLE_ORDER) );
+ if ( !mfn )
+ panic("Not enough memory for frame table");
+ map_pages_to_xen(s, mfn, step, PAGE_HYPERVISOR);
}
- memset(frame_table, 0, nr_pages << PAGE_SHIFT);
+ memset(start, 0, end - start);
+ memset(end, -1, s - (unsigned long)end);
+}
+
+void __init init_frametable(void)
+{
+ unsigned int sidx, eidx, nidx;
+ unsigned int max_idx = (max_pdx + PDX_GROUP_COUNT - 1) / PDX_GROUP_COUNT;
+
+#ifdef __x86_64__
+ BUILD_BUG_ON(XEN_VIRT_END > FRAMETABLE_VIRT_END);
+#endif
+ BUILD_BUG_ON(FRAMETABLE_VIRT_START & ((1UL << L2_PAGETABLE_SHIFT) - 1));
+
+ for ( sidx = 0; ; sidx = nidx )
+ {
+ eidx = find_next_zero_bit(pdx_group_valid, max_idx, sidx);
+ nidx = find_next_bit(pdx_group_valid, max_idx, eidx);
+ if ( nidx >= max_idx )
+ break;
+ init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
+ pdx_to_page(eidx * PDX_GROUP_COUNT));
+ }
+ init_frametable_chunk(pdx_to_page(sidx * PDX_GROUP_COUNT),
+ pdx_to_page(max_pdx - 1) + 1);
}
void __init arch_init_memory(void)
#endif
}
+static void __init set_pdx_range(unsigned long smfn, unsigned long emfn)
+{
+ unsigned long idx, eidx;
+
+ idx = pfn_to_pdx(smfn) / PDX_GROUP_COUNT;
+ eidx = (pfn_to_pdx(emfn - 1) + PDX_GROUP_COUNT) / PDX_GROUP_COUNT;
+ for ( ; idx < eidx; ++idx )
+ __set_bit(idx, pdx_group_valid);
+}
+
/* A temporary copy of the e820 map that we can mess with during bootstrap. */
static struct e820map __initdata boot_e820;
if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) )
continue;
+ set_pdx_range(s >> PAGE_SHIFT, e >> PAGE_SHIFT);
+
/* Map the chunk. No memory will need to be allocated to do this. */
map_pages_to_xen(
(unsigned long)maddr_to_bootstrap_virt(s),
}
#endif
+ set_pdx_range(s >> PAGE_SHIFT, e >> PAGE_SHIFT);
+
/* Need to create mappings above 16MB. */
map_s = max_t(uint64_t, s, 16<<20);
map_e = e;
/* Physical address where Xen was relocated to. */
extern unsigned long xen_phys_start;
-extern unsigned long max_page, max_pdx;
+extern unsigned long max_pdx;
extern unsigned long pfn_pdx_bottom_mask, ma_va_bottom_mask;
extern unsigned int pfn_pdx_hole_shift;
extern unsigned long pfn_hole_mask;
#define pdx_to_virt(pdx) ((void *)(DIRECTMAP_VIRT_START + \
((unsigned long)(pdx) << PAGE_SHIFT)))
-static inline int __mfn_valid(unsigned long mfn)
-{
- return mfn < max_page && !(mfn & pfn_hole_mask);
-}
+extern int __mfn_valid(unsigned long mfn);
static inline unsigned long pfn_to_pdx(unsigned long pfn)
{